/* hardware assisted paging bits */
extern int opt_hap_enabled;
-static inline void svm_inject_exception(struct vcpu *v, int trap,
+static void svm_inject_exception(struct vcpu *v, int trap,
int ev, int error_code)
{
eventinj_t event;
write_efer(read_efer() & ~EFER_SVME);
}
+#ifdef __x86_64__
+
+static int svm_lme_is_set(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LME;
+}
+
+static int svm_long_mode_enabled(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LMA;
+}
+
+#else /* __i386__ */
+
+static int svm_lme_is_set(struct vcpu *v)
+{ return 0; }
+static int svm_long_mode_enabled(struct vcpu *v)
+{ return 0; }
+
+#endif
+
+static int svm_cr4_pae_is_set(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return guest_cr4 & X86_CR4_PAE;
+}
+
+static int svm_paging_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+ return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
+}
+
+static int svm_pae_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
+}
+
+static int svm_nx_enabled(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
+}
+
+static int svm_pgbit_test(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
+}
+
static void svm_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
}
}
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
+static int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
u32 ecx = regs->ecx;
goto gp_fault;
}
-#ifdef __x86_64__
if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
{
/* EFER.LME transition from 0 to 1. */
goto gp_fault;
}
}
-#endif /* __x86_64__ */
v->arch.hvm_svm.cpu_shadow_efer = msr_content;
vmcb->efer = msr_content | EFER_SVME;
#define savedebug(_v,_reg) \
asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
-static inline void svm_save_dr(struct vcpu *v)
+static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
}
-static inline void __restore_debug_registers(struct vcpu *v)
+static void __restore_debug_registers(struct vcpu *v)
{
loaddebug(&v->arch.guest_context, 0);
loaddebug(&v->arch.guest_context, 1);
return 0;
}
-static inline void svm_restore_dr(struct vcpu *v)
+static void svm_restore_dr(struct vcpu *v)
{
if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
__restore_debug_registers(v);
static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int long_mode = 0;
+ int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#ifdef __x86_64__
- long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
switch ( seg )
{
case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
__update_guest_eip(vmcb, inst_len);
}
-static inline unsigned long *get_reg_p(
+static unsigned long *get_reg_p(
unsigned int gpreg,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
}
-static inline unsigned long get_reg(
+static unsigned long get_reg(
unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
unsigned long *gp;
}
-static inline void set_reg(
+static void set_reg(
unsigned int gpreg, unsigned long value,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
/* Get the address of INS/OUTS instruction */
-static inline int svm_get_io_address(
+static int svm_get_io_address(
struct vcpu *v, struct cpu_user_regs *regs,
unsigned int size, ioio_info_t info,
unsigned long *count, unsigned long *addr)
svm_segment_register_t *seg = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-#ifdef __x86_64__
- /* If we're in long mode, we shouldn't check the segment presence & limit */
+ /* If we're in long mode, don't check the segment presence & limit */
long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
/* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
* l field combined with EFER_LMA says whether it's 16 or 64 bit.
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
-#if defined(__x86_64__)
if ( svm_lme_is_set(v) )
{
if ( !svm_cr4_pae_is_set(v) )
v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}
-#endif /* __x86_64__ */
if ( !paging_mode_hap(v->domain) )
{
return result;
}
-static inline void svm_do_msr_access(
+static void svm_do_msr_access(
struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
__update_guest_eip(vmcb, inst_len);
}
-static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-static inline int svm_long_mode_enabled(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LMA;
-}
-
-static inline int svm_lme_is_set(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LME;
-}
-
-static inline int svm_cr4_pae_is_set(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return guest_cr4 & X86_CR4_PAE;
-}
-
-static inline int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static inline int svm_pae_enabled(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static inline int svm_nx_enabled(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static inline int svm_pgbit_test(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
-}
-
#define SVM_REG_EAX (0)
#define SVM_REG_ECX (1)
#define SVM_REG_EDX (2)